Meta-analysis of avian responses and butterfly eyespots

Author

Mizuno et al.

Published

September 15, 2023

1 Setting ups

Load packages and custum function. Custom function is used for calculating effect size (lnRR) and its variance from raw data.

Code
pacman::p_load(ape,
               GoodmanKruskal,
               DT,
               ggokabeito,
               ggcorrplot,
               here, 
               knitr,
               tidyverse,
               patchwork,
               metafor,
               miWQS,
               orchaRd,
               parallel)

# function for calculating effect size (lnRR)
effect_lnRR <- function(dt) {
  # replace 0 in "C_mean", "T_sd", "C_sd", "C_proportion" with each minimum values
  # if proportion too extreme value, replace minimum value (only "T_proportion")

  dt <- dt %>%
  mutate(
    C_mean = ifelse(C_mean == 0, 0.04, C_mean),
    T_sd = ifelse(T_sd == 0, 0.01, T_sd),
    C_sd = ifelse(C_sd == 0, 0.05, C_sd),
    C_proportion = ifelse(C_proportion == 0, 0.01, C_proportion),
    T_proportion = ifelse(T_proportion < 0.01, 0.01, T_proportion),
    T_proportion = ifelse(T_proportion == 1, 0.9, T_proportion)
  )
    
  # copy dataset for adding effect size and its variation (lnRR /lnRR_var) column
  dt1 <- dt %>%
    mutate(
      lnRR = NA,
      lnRR_var = NA
    )

  for (i in seq_len(nrow(dt1))) {
    Tn <- dt1$Tn[i]
    Cn <- dt1$Cn[i]
    T_mean <- dt1$T_mean[i]
    C_mean <- dt1$C_mean[i]
    T_proportion <- dt1$T_proportion[i]
    C_proportion <- dt1$C_proportion[i]
    T_sd <- dt1$T_sd[i]
    C_sd <- dt1$C_sd[i]
    Response <- dt1$Response[i]
    Measure <- dt1$Measure[i]
    Study_design <- dt1$Study_design[i]
    Direction <- dt1$Direction[i]

    # continuous data - using escalc() (metafor package)
    if (Response == "continuous" & Study_design == "independent") {

      effect <- escalc(
        measure = "ROM",
        n1i = Tn, n2i = Cn,
        m1i = T_mean, m2 = C_mean,
        sd1i = T_sd, sd2i = C_sd,
        var.names = c("lnRR", "lnRR_var")
      )

      dt1$lnRR[i] <- effect$lnRR
      dt1$lnRR_var[i] <- effect$lnRR_var
    } else if (Response == "continuous" & Study_design == "dependent") {

      effect <- escalc(
        measure = "ROMC",
        ni = (Tn + Cn) / 2,
        m1i = T_mean, m2 = C_mean,
        sd1i = T_sd, sd2i = C_sd,
        ri = 0.5,
        var.names = c("lnRR", "lnRR_var")
      )

      dt1$lnRR[i] <- effect$lnRR
      dt1$lnRR_var[i] <- effect$lnRR_var
    }

    # proportion data (no sd values)
    else if (Response == "proportion1" & Study_design == "independent") {
      
    T_proportion <- replace(
        T_proportion, Direction == "Decrease",
        (1 - T_proportion[Direction == "Decrease"])
      )
      C_proportion <- replace(
        C_proportion, Direction == "Decrease",
        (1 - C_proportion[Direction == "Decrease"])
      )

      # transform proportion value
      asin_trans <- function(proportion) {
        trans <- asin(sqrt(proportion))
        trans
      }

      T_proportion <- asin_trans(T_proportion)
      C_proportion <- asin_trans(C_proportion)

      # calculate lnRR and lnRR variance
      lnRR_pro1 <- log(T_proportion / C_proportion)
      lnRR_var_pro1 <- (1 / sqrt(8))^2 * (1 / (T_proportion^2 * Tn) +
        1 / (C_proportion^2 * Cn))

      dt1$lnRR[i] <- lnRR_pro1
      dt1$lnRR_var[i] <- lnRR_var_pro1

    } else if (Response == "proportion1" & Study_design == "dependent") {
      
      T_proportion <- replace(
        T_proportion, Direction == "Decrease",
        (1 - T_proportion[Direction == "Decrease"])
      )
      C_proportion <- replace(
        C_proportion, Direction == "Decrease",
        (1 - C_proportion[Direction == "Decrease"])
      )

      # transform proportion value
      asin_trans <- function(proportion) {
        trans <- asin(sqrt(proportion))
        trans
      }

      T_proportion <- asin_trans(T_proportion)
      C_proportion <- asin_trans(C_proportion)

      # calculate lnRR and lnRR variance
      lnRR_pro1 <- log(T_proportion / C_proportion)
      lnRR_var_pro1 <- (1 / sqrt(8))^2 * (1 / (T_proportion^2 * Tn) +
        1 / (C_proportion^2 * Cn)) -
        2 * 0.5 * sqrt((1 / sqrt(8))^2 * (1 / (T_proportion^2 * Tn))) *
          sqrt((1 / sqrt(8))^2 * (1 / (C_proportion^2 * Cn)))

      dt1$lnRR[i] <- lnRR_pro1
      dt1$lnRR_var[i] <- lnRR_var_pro1
    }

    # proportion data (exist sd values)
    else if (Response == "proportion2" & Study_design == "independent") {
      
      T_proportion <- replace(
        T_proportion, Direction == "Decrease",
        (1 - T_proportion[Direction == "Decrease"])
      )
      C_proportion <- replace(
        C_proportion, Direction == "Decrease",
        (1 - C_proportion[Direction == "Decrease"])
      )
      
      # transform proportion mean value
      asin_trans <- function(proportion) {
        trans <- asin(sqrt(proportion))
        trans
      }

      T_SD <- sqrt(T_sd^2 / (4 * (T_proportion) * (1 - (T_proportion))))
      C_SD <- sqrt(C_sd^2 / (4 * (C_proportion) * (1 - (C_proportion))))

      T_proportion <- asin_trans(T_proportion)
      C_proportion <- asin_trans(C_proportion)

      # calculate lnRR and lnRR variance
      lnRR_pro2 <- log(T_proportion / C_proportion)
      lnRR_var_pro2 <- (T_SD)^2 * (1 / (T_proportion^2 * Tn)) +
        (C_SD)^2 * (1 / (C_proportion^2 * Cn))

      dt1$lnRR[i] <- lnRR_pro2
      dt1$lnRR_var[i] <- lnRR_var_pro2

    } else if (Response == "proportion2" & Study_design == "dependent") {
      
      T_proportion <- replace(
        T_proportion, Direction == "Decrease",
        (1 - T_proportion[Direction == "Decrease"])
      )
      C_proportion <- replace(
        C_proportion, Direction == "Decrease",
        (1 - C_proportion[Direction == "Decrease"])
      )
      
      # transform proportion mean value
      asin_trans <- function(proportion) {
        trans <- asin(sqrt(proportion))
        trans
      }

      T_SD <- sqrt(T_sd^2 / (4 * (T_proportion) * (1 - (T_proportion))))
      C_SD <- sqrt(C_sd^2 / (4 * (C_proportion) * (1 - (C_proportion))))

      T_proportion <- asin_trans(T_proportion)
      C_proportion <- asin_trans(C_proportion)

      # calculate lnRR and lnRR variance
      lnRR_pro2 <- log(T_proportion / C_proportion)
      lnRR_var_pro2 <- (T_SD)^2 * (1 / (T_proportion^2 * Tn)) +
        (C_SD)^2 * (1 / (C_proportion^2 * Cn)) -
        2 * 0.5 * sqrt((T_SD)^2 * (1 / (T_proportion^2 * Tn))) *
          sqrt((C_SD)^2 * (1 / (C_proportion^2 * Cn)))

      dt1$lnRR[i] <- lnRR_pro2
      dt1$lnRR_var[i] <- lnRR_var_pro2
    }
  }

  return(dt1)
}

2 Prepare datasets for analysis

First, we used a predator dataset to check whether we need to consider phylogeney. If so, we will use predator and prey datasets for following meta-analysis.

Code
dat_predator <- read.csv(here("data/predator_22072023.csv"), header = T, fileEncoding = "CP932")

dat_all <-  read.csv(here("data/all_15082023.csv"), header = T, fileEncoding = "CP932")


datatable(dat_predator, caption = "Predator datasets", options = list(pageLength = 10, scrollX = TRUE))

Table 1 - Predator datasets

Code
trees <- read.nexus(here("data/bird_phy.nex"))
plot(trees[1])

Figure 1— Phylogenetic tree of bird species
Code
dat_pred <- effect_lnRR(dat_predator)
dat <- effect_lnRR(dat_all)

# add obseravation id
dat_pred$Obs_ID <- 1:nrow(dat_pred)
dat$Obs_ID <- 1:nrow(dat)

hist(dat$lnRR)

Code
hist(dat$lnRR_var)

Code
# the data of diameter, area, and background are log-transformed because it is skew data
dat$Log_diameter <- log(dat$Diameter_pattern)
dat$Log_area <- log(dat$Area_pattern)
dat$Log_background <- log(dat$Area_background)

# use vcalc to calculate variance-covariance matrix
VCV_pred <- vcalc(vi = lnRR_var, 
                  cluster = Cohort_ID, 
                  subgroup = Obs_ID, 
                  data = dat_pred)

VCV <- vcalc(vi = lnRR_var, 
             cluster = Cohort_ID,
             obs = Obs_ID,
             rho = 0.5,
             data = dat)

I cannot attach the caption to datatable() format table. I need to use kable() format table?

Code
datatable(dat, caption = "Dataset for meta-analysis", 
          options = list(pageLength = 10, scrollX = TRUE))

Table 2 - Dataset for meta-analysis

If phylogeny do not explain heterogeniety much, we will not need to consider it and the two datasets can be merged.

We used meta-analytical models to calculate total I2 (a measure of heterogeneity not caused by sampling error; Higgins. et al., 2003) and the partial I2 explained by each random factor.

Based on this, we need not to consider phylogenetic relatedness from our meta-regressions as this random factor did not explain much of the heterogeneity between effect sizes (partial I2 < 0.001%), then we can combine two datasets (predator and prey) for meta-analysis.

Code
phy_model <- function(cor_tree = vcv_tree){
            model <- rma.mv(yi = lnRR,
                            V = VCV_pred,
                            random = list(~1 | Study_ID,
                                          ~1 | Shared_control_ID,
                                          ~1 | Cohort_ID,
                                          ~1 | Obs_ID,
                                          ~1 | Bird_species,
                                          ~1 | Phylogeny),
                            R = list(Phylogeny = cor_tree),
                            test = "t",
                            method = "REML",
                            sparse = TRUE,
                            data = dat_pred)
  model
}

tree_50 <- trees[1:50]
vcv_tree_50 <- map(tree_50, ~vcv(.x, corr = TRUE))

# Run multiple meta-analyses with 50 different trees and obtain the combined result

ma_50 <- mclapply(vcv_tree_50, phy_model, mc.cores = 8) 
Code
ma_50 <- readRDS(here("Rdata", "ma_50.RDS"))

# combining the results
est_50 <- map_dbl(ma_50, ~ .x$b[[1]])
se_50 <-  map_dbl(ma_50, ~ .x$se)
df_50 <- c(rbind(est_50, se_50))

# creating an array required by pool.mi
my_array <- array(df_50, dim = c(1, 2, 50))

com_res <- round(pool.mi(my_array), 4)
Code
knitr::kable(com_res, caption = "Combined result of 50 phylogenetic trees")
Combined result of 50 phylogenetic trees
pooled.mean pooled.total.se se.within se.between relative.inc.var frac.miss.info CI.1 CI.2 p.value
0.1394 0.1192 0.1192 0 0 0 -0.0943 0.3731 0.2424
Code
sigma2_mod <- do.call(rbind, lapply(ma_50, function(x) x$sigma2))
sigma2_mod <- data.frame(sigma2_mod)

colnames(sigma2_mod) <- c("sigma^2.1_Study_ID", "sigma^2.2_SharedControl_ID", 
                          "sigma^2.3_Cohort_ID", "sigma^2.4_Obs_ID", 
                          "sigma^2.5_BirdSpecies", "sigma^2.6_Phylogeny")

sigma2_mod <- round(colMeans(sigma2_mod), 4)

knitr::kable(sigma2_mod, caption = "Average variance components")
Average variance components
x
sigma^2.1_Study_ID 0.0000
sigma^2.2_SharedControl_ID 0.0923
sigma^2.3_Cohort_ID 0.1009
sigma^2.4_Obs_ID 0.5323
sigma^2.5_BirdSpecies 0.0000
sigma^2.6_Phylogeny 0.0000
We got 1000 phylogenetic trees from BirdTree.org

Only 50 trees are used as in Nakagawa & Villemereuil (2019)

3 Meta-analysis

We used meta-analytical models to calculate total I2 (a measure of heterogeneity not caused by sampling error; Higgins. et al., 2003) and the partial I2 explained by each random factor.

Which random effects remove in our models?

We can remove Shared control ID and Cohort ID

Code
# I exclude cohort_ID because sigma^2.2 = 0 and I2 = 0
ma_all <- rma.mv(yi = lnRR,
                  V = VCV, 
                  random = list(~1 | Study_ID,
                                ~1 | Shared_control_ID,
                                ~1 | Cohort_ID,
                                ~1 | Obs_ID),
                  test = "t",
                  method = "REML", 
                  sparse = TRUE,
                  data = dat)

summary(ma_all)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-248.5672   497.1344   507.1344   524.9761   507.3688   

Variance Components:

            estim    sqrt  nlvls  fixed             factor 
sigma^2.1  0.0495  0.2224     32     no           Study_ID 
sigma^2.2  0.0000  0.0000     88     no  Shared_control_ID 
sigma^2.3  0.0000  0.0000    157     no          Cohort_ID 
sigma^2.4  0.2433  0.4932    263     no             Obs_ID 

Test for Heterogeneity:
Q(df = 262) = 2726.4404, p-val < .0001

Model Results:

estimate      se    tval   df    pval   ci.lb   ci.ub      
  0.2077  0.0600  3.4593  262  0.0006  0.0895  0.3259  *** 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
i2 <- round(i2_ml(ma_all), 4)
r2 <- round(r2_ml(ma_all), 4)
I2_Total I2_Study_ID I2_Shared_control_ID I2_Cohort_ID I2_Obs_ID
96.6694 16.3369 0 0 80.3326
R2_marginal R2_conditional
0 0.169

Table 3 - Heterogeneity of effect size

Code
orchard_plot(ma_all,
              group = "Study_ID",
              xlab = "log response ratio (lnRR)", 
              angle = 45) + 
              scale_x_discrete(labels = c("Overall effect")) + 
              scale_colour_okabe_ito()+
              scale_fill_okabe_ito()

Figure 2— Estimates of overall effect size and 95% confidence intervals

4 Meta-regressions: uni-moderator

Eyespot vs. conspicuous patterns - Is there significant difference of effect size between two patterns?

Code
#normal model
mr_eyespot <- rma.mv(yi = lnRR,
                    V = VCV, 
                    mods = ~ Treatment_stimulus,
                    random = list(~1 | Study_ID,
                                  ~1 | Obs_ID),
                    test = "t",
                    method = "REML", 
                    sparse = TRUE,
                    data = dat)

summary(mr_eyespot)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.7885   495.5769   503.5769   517.8350   503.7332   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0539  0.2321     32     no  Study_ID 
sigma^2.2  0.2436  0.4935    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2715.7394, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.1562, p-val = 0.6930

Model Results:

                           estimate      se    tval   df    pval    ci.lb 
intrcpt                      0.1779  0.0984  1.8082  261  0.0717  -0.0158 
Treatment_stimuluseyespot    0.0435  0.1100  0.3953  261  0.6930  -0.1732 
                            ci.ub    
intrcpt                    0.3717  . 
Treatment_stimuluseyespot  0.2601    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_1 <- round(r2_ml(mr_eyespot), 4)
R2_marginal R2_conditional
0.0013 0.1822

Table XX. Model goodness-of-fit

Code
orchard_plot(mr_eyespot,
            mod = "Treatment_stimulus",
            group = "Study_ID",
            xlab = "log response ratio (lnRR)",
            angle = 45) + 
            scale_colour_okabe_ito(order = 2:3)+
            scale_fill_okabe_ito(order = 2:3)

Figure 3— Eyespot vs. conspicuous patterns
Code
# intercept-removed model
mr_eyespot1 <- rma.mv(yi = lnRR,
                      V = VCV, 
                      mods = ~ Treatment_stimulus -1,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML", 
                      sparse = TRUE,
                      data = dat)

summary(mr_eyespot1)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.7885   495.5769   503.5769   517.8350   503.7332   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0539  0.2321     32     no  Study_ID 
sigma^2.2  0.2436  0.4935    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2715.7394, p-val < .0001

Test of Moderators (coefficients 1:2):
F(df1 = 2, df2 = 261) = 5.8135, p-val = 0.0034

Model Results:

                               estimate      se    tval   df    pval    ci.lb 
Treatment_stimulusconspicuous    0.1779  0.0984  1.8082  261  0.0717  -0.0158 
Treatment_stimuluseyespot        0.2214  0.0699  3.1679  261  0.0017   0.0838 
                                ci.ub     
Treatment_stimulusconspicuous  0.3717   . 
Treatment_stimuluseyespot      0.3590  ** 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
mr_diameter <- rma.mv(yi = lnRR,
                      V = VCV, 
                      mods = ~ Log_diameter,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML", 
                      sparse = TRUE,
                      data = dat)

summary(mr_diameter)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-244.7875   489.5749   497.5749   511.8330   497.7312   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0448  0.2116     32     no  Study_ID 
sigma^2.2  0.2383  0.4881    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2716.5292, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 5.9052, p-val = 0.0158

Model Results:

              estimate      se     tval   df    pval    ci.lb   ci.ub    
intrcpt        -0.1667  0.1643  -1.0143  261  0.3114  -0.4903  0.1569    
Log_diameter    0.1937  0.0797   2.4301  261  0.0158   0.0367  0.3507  * 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_2 <- round(r2_ml(mr_diameter), 4)
R2_marginal R2_conditional
0.0658 0.2137

Table XX. Model goodness-of-fit

Code
bubble_plot(mr_diameter,
            mod = "Log_diameter",
            group = "Study_ID",
            xlab = "Log-transformed diameter")

Code
mr_area <- rma.mv(yi = lnRR,
                  V = VCV, 
                  mods = ~ Log_area,
                  random = list(~1 | Study_ID,
                                ~1 | Obs_ID),
                  test = "t",
                  method = "REML", 
                  sparse = TRUE,
                  data = dat)

summary(mr_area)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-244.2677   488.5354   496.5354   510.7935   496.6916   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0491  0.2216     32     no  Study_ID 
sigma^2.2  0.2362  0.4860    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2713.1669, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 6.8516, p-val = 0.0094

Model Results:

          estimate      se     tval   df    pval    ci.lb   ci.ub     
intrcpt    -0.1841  0.1610  -1.1437  261  0.2538  -0.5010  0.1329     
Log_area    0.1103  0.0421   2.6176  261  0.0094   0.0273  0.1933  ** 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_3 <- round(r2_ml(mr_area), 4)
R2_marginal R2_conditional
0.0816 0.2396

Table XX. Model goodness-of-fit

Code
bubble_plot(mr_area,
            mod = "Log_area",
            group = "Study_ID",
            xlab = "Log-transformed area")

Code
mr_num <- rma.mv(yi = lnRR,
                  V = VCV,
                  mods = ~ Number_pattern,
                  random = list(~1 | Study_ID,
                                ~1 | Obs_ID),
                      test = "t",
                      method = "REML",
                      sparse = TRUE,
                      data = dat)

summary(mr_num)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-245.9910   491.9821   499.9821   514.2402   500.1383   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0473  0.2176     32     no  Study_ID 
sigma^2.2  0.2395  0.4894    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2703.3403, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 4.4221, p-val = 0.0364

Model Results:

                estimate      se     tval   df    pval    ci.lb    ci.ub      
intrcpt           0.3444  0.0881   3.9111  261  0.0001   0.1710   0.5178  *** 
Number_pattern   -0.0578  0.0275  -2.1029  261  0.0364  -0.1120  -0.0037    * 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_4 <- round(r2_ml(mr_num), 4)
R2_marginal R2_conditional
0.0195 0.1813

Table XX. Model goodness-of-fit

Code
bubble_plot(mr_num,
            mod = "Number_pattern",
            group = "Study_ID",
            xlab = "Number of patterns") 

Our dataset includes two types of stimuli: live and artificial. Is there significant difference of effect size between two stimuli?

Code
# normal model
mr_prey_type <- rma.mv(yi = lnRR,
                  V = VCV, 
                  mods = ~ Type_prey,
                  random = list(~1 | Study_ID,
                                ~1 | Obs_ID),
                  test = "t",
                  method = "REML", 
                  sparse = TRUE,
                  data = dat)

summary(mr_prey_type)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.6124   495.2249   503.2249   517.4830   503.3811   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0571  0.2390     32     no  Study_ID 
sigma^2.2  0.2424  0.4923    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2687.4935, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.3362, p-val = 0.5625

Model Results:

               estimate      se    tval   df    pval    ci.lb   ci.ub    
intrcpt          0.1834  0.0762  2.4061  261  0.0168   0.0333  0.3334  * 
Type_preyreal    0.0772  0.1331  0.5798  261  0.5625  -0.1849  0.3393    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_5 <- round(r2_ml(mr_prey_type), 4)
R2_marginal R2_conditional
0.0035 0.1935

Table XX. Model goodness-of-fit

Code
orchard_plot(mr_prey_type,
              mod = "Type_prey",
              group = "Study_ID",
              xlab = "Type of prey",
              angle = 45) +
            scale_colour_okabe_ito(order = 4:5)+
            scale_fill_okabe_ito(order = 4:5)

Code
# intercept-removed model
mr_prey_type1 <- rma.mv(yi = lnRR,
                        V = VCV, 
                        mods = ~ Type_prey -1,
                        random = list(~1 | Study_ID,
                                      ~1 | Obs_ID),
                        test = "t",
                        method = "REML", 
                        sparse = TRUE,
                        data = dat)

summary(mr_prey_type1)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.6124   495.2249   503.2249   517.4830   503.3811   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0571  0.2390     32     no  Study_ID 
sigma^2.2  0.2424  0.4923    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2687.4935, p-val < .0001

Test of Moderators (coefficients 1:2):
F(df1 = 2, df2 = 261) = 5.7450, p-val = 0.0036

Model Results:

                     estimate      se    tval   df    pval   ci.lb   ci.ub    
Type_preyartificial    0.1834  0.0762  2.4061  261  0.0168  0.0333  0.3334  * 
Type_preyreal          0.2605  0.1091  2.3876  261  0.0177  0.0457  0.4754  * 

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Our dataset includes 4 types of prey type: real butterfly, abstract butterfly, artificial prey. Is there significant difference of effect size between two stimuli?

Code
mr_prey_shape <- rma.mv(yi = lnRR,
                        V = VCV, 
                        mods = ~ Shape_prey -1,
                        random = list(~1 | Study_ID,
                                      ~1 | Obs_ID),
                                      test = "t",
                                      method = "REML", 
                                      sparse = TRUE,
                                      data = dat)

summary(mr_prey_shape)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-243.8328   487.6657   499.6657   521.0067   499.9990   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0562  0.2371     32     no  Study_ID 
sigma^2.2  0.2399  0.4898    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 259) = 2494.5220, p-val < .0001

Test of Moderators (coefficients 1:4):
F(df1 = 4, df2 = 259) = 3.7295, p-val = 0.0057

Model Results:

                              estimate      se    tval   df    pval    ci.lb 
Shape_preyabstract_butterfly    0.3223  0.1080  2.9841  259  0.0031   0.1096 
Shape_preyabstract_stimuli      0.0126  0.1879  0.0671  259  0.9465  -0.3574 
Shape_preybutterfly             0.2600  0.1085  2.3964  259  0.0173   0.0463 
Shape_preycaterpillar           0.0663  0.1286  0.5160  259  0.6063  -0.1868 
                               ci.ub     
Shape_preyabstract_butterfly  0.5349  ** 
Shape_preyabstract_stimuli    0.3826     
Shape_preybutterfly           0.4737   * 
Shape_preycaterpillar         0.3195     

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
# library(multcomp)
# # Perform post-hoc tests on the estimated coefficients
# mc <- glht(mr_prey_shape, linfct = mcp(Shape_prey = "Tukey"))

r2_6 <- round(r2_ml(mr_prey_shape), 4)
R2_marginal R2_conditional
0.0542 0.2338

Table XX. Model goodness-of-fit

Code
orchard_plot(mr_prey_shape,
              mod = "Shape_prey",
              group = "Study_ID",
              xlab = "Shape of prey",
              angle = 45) +
              scale_colour_okabe_ito(order = 6:9)+
              scale_fill_okabe_ito(order = 6:9)

Figure 4— Effect size and prey shape

5 Correlation visualisation and choose moderators

Before we run multi-moderator meta-regressions, we need to consider the correlation between moderators. Area, diameter and background seem to be correlated. We visualised the correlation between these variables.

Code
# correlation between continuous variables
corr_cont <- round(cor(dat[, c("Diameter_pattern", "Area_pattern", "Number_pattern", "Area_background")]),2)

p1 <- ggcorrplot(corr_cont, hc.order = TRUE, lab = TRUE, outline.col = "white", colors = c("#6D9EC1", "white", "#E46726"), title = "(a) Continuous variables")

corr_cont_log <- round(cor(dat[, c("Log_diameter", "Log_area", "Number_pattern", "Log_background")]),2)

p2 <- ggcorrplot(corr_cont_log, hc.order = TRUE, lab = TRUE, outline.col = "white", colors = c("#6D9EC1", "white", "#E46726"), title = "(b) Log-transormed continuous variables")

p1 + p2 +plot_layout(guides = 'collect')

Figure 5— Correlation between coninuous moderators
Code
dat1 <- dat %>%
  select(Treatment_stimulus, Type_prey, Shape_prey)

corr_cat <- GKtauDataframe(dat1)
plot(corr_cat)

Figure 6— Correlation between categorical variables

We should not include “Shape_prey” and “Type_prey” in the model at the same time. Because they are correlated.

We used model R2 values to find better model and modelator VIF values to check multicollinearity between moderators. Higher R2 indicates better model and VIF > 2 indicates multicollinearity.

5.0.1 R2

Code
r2_area <- rma.mv(yi = lnRR,
                  V = VCV,
                  mods = ~ Log_area + Number_pattern,
                  random = list(~1 | Study_ID,
                                ~1 | Obs_ID),
                  test = "t",
                  method = "REML",
                  sparse = TRUE,
                  data = dat)

r2_diameter <- rma.mv(yi = lnRR,
                      V = VCV,
                      mods = ~ Log_diameter + Number_pattern,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML",
                      sparse = TRUE,
                      data = dat)

# check r2 values
r2_ml(r2_area)
   R2_marginal R2_conditional 
    0.07553592     0.22788272 
Code
r2_ml(r2_diameter)
   R2_marginal R2_conditional 
    0.06439709     0.20868706 

It seems area is a better predictor than diameter .

Code
vif_1 <- rma.mv(yi = lnRR,
                V = VCV,
                mods = ~ Log_area + Number_pattern + 
                        Treatment_stimulus + Type_prey + Shape_prey,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID), 
                test = "t",
                method = "REML",
                sparse = TRUE,
                data = dat)

# we get Warning message:
# Redundant predictors dropped from the model. 

vif.rma(vif_1)

Log_area  Number_pattern  Treatment_stimuluseyespot  Type_preyreal  Shape_preyabstract_stimuli  Shape_preycaterpillar 
  2.8654          1.2704                     1.4929         1.8781                      3.0207                 1.4596 

Shape_prey has high VIF value. We removed it from the model and run new model.

Code
vif_2 <- rma.mv(yi = lnRR,
                V = VCV,
                mods = ~ Log_area + Number_pattern + 
                        Treatment_stimulus + Type_prey,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID), 
                test = "t",
                method = "REML",
                sparse = TRUE,
                data = dat)

vif.rma(vif_2)

Log_area  Number_pattern  Treatment_stimuluseyespot  Type_preyreal 
  1.1200          1.1937                     1.0961         1.2011 

It looks good. We will use this model.

6 Meta-regressions: multi-moderator

Include all moderators in the model.

Code
mr_all <- rma.mv(yi = lnRR,
                V = VCV, 
                mods = ~ Treatment_stimulus + Log_area 
                + Number_pattern + Type_prey,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID),
                test = "t",
                method = "REML", 
                sparse = TRUE,
                data = dat)

summary(mr_all)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-240.5439   481.0877   495.0877   519.9584   495.5357   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0555  0.2355     32     no  Study_ID 
sigma^2.2  0.2342  0.4840    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 258) = 2592.2639, p-val < .0001

Test of Moderators (coefficients 2:5):
F(df1 = 4, df2 = 258) = 2.6696, p-val = 0.0327

Model Results:

                           estimate      se     tval   df    pval    ci.lb 
intrcpt                     -0.0883  0.2151  -0.4104  258  0.6818  -0.5117 
Treatment_stimuluseyespot    0.0103  0.1144   0.0897  258  0.9286  -0.2150 
Log_area                     0.0979  0.0457   2.1410  258  0.0332   0.0079 
Number_pattern              -0.0504  0.0300  -1.6802  258  0.0941  -0.1095 
Type_preyreal                0.1924  0.1438   1.3379  258  0.1821  -0.0908 
                            ci.ub    
intrcpt                    0.3352    
Treatment_stimuluseyespot  0.2355    
Log_area                   0.1879  * 
Number_pattern             0.0087  . 
Type_preyreal              0.4756    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_ml(mr_all)
   R2_marginal R2_conditional 
    0.07939435     0.25570867 

Only include moderators related to conspicuousness (pattern area and number of patterns) in the model.

Code
mr_cons <- rma.mv(yi = lnRR,
                V = VCV, 
                mods = ~ Log_area + Number_pattern,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID),
                test = "t",
                method = "REML", 
                sparse = TRUE,
                data = dat)

summary(mr_cons)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-242.9378   485.8757   495.8757   513.6791   496.1119   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0466  0.2158     32     no  Study_ID 
sigma^2.2  0.2360  0.4858    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 260) = 2679.1464, p-val < .0001

Test of Moderators (coefficients 2:3):
F(df1 = 2, df2 = 260) = 4.3867, p-val = 0.0134

Model Results:

                estimate      se     tval   df    pval    ci.lb   ci.ub    
intrcpt          -0.0218  0.1969  -0.1109  260  0.9118  -0.4095  0.3659    
Log_area          0.0908  0.0438   2.0735  260  0.0391   0.0046  0.1770  * 
Number_pattern   -0.0394  0.0287  -1.3742  260  0.1706  -0.0960  0.0171    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_ml(mr_cons)
   R2_marginal R2_conditional 
    0.07553592     0.22788272 
Code
bubble_plot(mr_cons,
            mod = "Log_area",
            group = "Study_ID",
            xlab = "Log-transformed area")

Code
mr_cons_1 <- rma.mv(yi = lnRR,
                V = VCV, 
                mods = ~ Log_area + Number_pattern + Treatment_stimulus,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID),
                test = "t",
                method = "REML", 
                sparse = TRUE,
                data = dat)

summary(mr_cons_1)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-242.1568   484.3135   496.3135   517.6545   496.6469   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0513  0.2264     32     no  Study_ID 
sigma^2.2  0.2361  0.4859    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 259) = 2678.9397, p-val < .0001

Test of Moderators (coefficients 2:4):
F(df1 = 3, df2 = 259) = 2.9683, p-val = 0.0325

Model Results:

                           estimate      se     tval   df    pval    ci.lb 
intrcpt                     -0.0563  0.2109  -0.2668  259  0.7899  -0.4715 
Log_area                     0.0914  0.0448   2.0403  259  0.0423   0.0032 
Number_pattern              -0.0404  0.0290  -1.3949  259  0.1642  -0.0975 
Treatment_stimuluseyespot    0.0517  0.1085   0.4764  259  0.6342  -0.1620 
                            ci.ub    
intrcpt                    0.3590    
Log_area                   0.1796  * 
Number_pattern             0.0166    
Treatment_stimuluseyespot  0.2653    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_ml(mr_cons_1)
   R2_marginal R2_conditional 
    0.07948479     0.24369201 
Code
mr_pre <- rma.mv(yi = lnRR,
                V = VCV, 
                mods = ~ Treatment_stimulus + Type_prey,
                random = list(~1 | Study_ID,
                              ~1 | Obs_ID),
                test = "t",
                method = "REML", 
                sparse = TRUE,
                data = dat)

summary(mr_pre)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-246.8556   493.7111   503.7111   521.5145   503.9474   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0608  0.2466     32     no  Study_ID 
sigma^2.2  0.2431  0.4930    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 260) = 2667.6509, p-val < .0001

Test of Moderators (coefficients 2:3):
F(df1 = 2, df2 = 260) = 0.2089, p-val = 0.8116

Model Results:

                           estimate      se    tval   df    pval    ci.lb 
intrcpt                      0.1650  0.1037  1.5906  260  0.1129  -0.0393 
Treatment_stimuluseyespot    0.0300  0.1171  0.2562  260  0.7980  -0.2005 
Type_preyreal                0.0699  0.1412  0.4947  260  0.6212  -0.2082 
                            ci.ub    
intrcpt                    0.3693    
Treatment_stimuluseyespot  0.2605    
Type_preyreal              0.3480    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
r2_ml(mr_pre)
   R2_marginal R2_conditional 
    0.00415544     0.20344953 

7 Publication bias

Code
# funnel plot - standard error
funnel(ma_all, yaxis = "sei", 
      xlab = "Standarised residuals",
      ylab = "Precision (inverse of SE)" )

# funnel plot - inverse of standard error
f2 <- funnel(ma_all, yaxis = "seinv", 
      xlab = "Standarized residuals",
      ylab = "Precision (inverse of SE)",  col = c(alpha("orange", 0.5)))

Figure 7— Effect size and its standard error

Figure 8— Effect size and its inverse standard error

Figure 9— Funnel plot of effect size and its standard error

Code
df_bias <- dat %>% mutate(sqrt_inv_e_n = sqrt((Cn + Tn)/(Cn * Tn)))

bias_model <- rma.mv(yi = lnRR,
                      V = lnRR_var, 
                      mods = ~ 1 + sqrt_inv_e_n,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML", 
                      sparse = TRUE,
                      data = df_bias)

summary(bias_model)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-246.1777   492.3554   500.3554   514.6135   500.5116   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0960  0.3099     32     no  Study_ID 
sigma^2.2  0.2168  0.4656    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2152.6798, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.0084, p-val = 0.9269

Model Results:

              estimate      se     tval   df    pval    ci.lb   ci.ub    
intrcpt         0.2426  0.1382   1.7550  261  0.0804  -0.0296  0.5148  . 
sqrt_inv_e_n   -0.0357  0.3884  -0.0918  261  0.9269  -0.8004  0.7291    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
bubble_plot(bias_model,
            mod = "sqrt_inv_e_n",
            group = "Study_ID",
            xlab = "Square root of inverse of effective sample size")

Figure 10— Egger’s test of publication bias
Code
year_model <- rma.mv(yi = lnRR,
                      V = VCV, 
                      mods = ~  1 + Year,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML", 
                      sparse = TRUE,
                      data = df_bias)

summary(year_model)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.6851   495.3701   503.3701   517.6282   503.5264   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0525  0.2291     32     no  Study_ID 
sigma^2.2  0.2437  0.4937    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2653.9609, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.0059, p-val = 0.9389

Model Results:

         estimate       se     tval   df    pval     ci.lb    ci.ub    
intrcpt    1.2075  13.0311   0.0927  261  0.9262  -24.4519  26.8670    
Year      -0.0005   0.0065  -0.0767  261  0.9389   -0.0133   0.0123    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
bubble_plot(year_model,
            mod = "Year",
            group = "Study_ID",
            xlab = "Year of publication")

Figure 11— Decline effect

8 Additional analyses

We compared effect size between two datasets (predator and prey) to see whether there is any difference in effect size between two datasets.

Code
mr_dataset <- rma.mv(yi = lnRR,
                      V = VCV,
                      mods = ~ Dataset,
                      random = list(~1 | Study_ID,
                                    ~1 | Obs_ID),
                      test = "t",
                      method = "REML",
                      sparse = TRUE,
                      data = dat)

summary(mr_dataset)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.3783   494.7565   502.7565   517.0146   502.9128   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0525  0.2291     32     no  Study_ID 
sigma^2.2  0.2429  0.4928    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2649.6615, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.6243, p-val = 0.4302

Model Results:

             estimate      se    tval   df    pval    ci.lb   ci.ub    
intrcpt        0.1575  0.0885  1.7798  261  0.0763  -0.0167  0.3316  . 
Datasetprey    0.0944  0.1195  0.7901  261  0.4302  -0.1409  0.3297    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
orchard_plot(mr_dataset,
            mod = "Dataset",
            group = "Study_ID",
            xlab = "Dataset", 
            angle = 45) 

Figure 12— Effect size and dataset - predator and prey
Code
mr_background <- rma.mv(yi = lnRR,
                        V = VCV, 
                        mods = ~ Log_background,
                        random = list(~1 | Study_ID,
                                      ~1 | Obs_ID),
                        test = "t",
                        method = "REML",
                        sparse = TRUE,
                        data = dat)

summary(mr_background)

Multivariate Meta-Analysis Model (k = 263; method: REML)

   logLik   Deviance        AIC        BIC       AICc   
-247.3566   494.7131   502.7131   516.9712   502.8694   

Variance Components:

            estim    sqrt  nlvls  fixed    factor 
sigma^2.1  0.0559  0.2364     32     no  Study_ID 
sigma^2.2  0.2429  0.4929    263     no    Obs_ID 

Test for Residual Heterogeneity:
QE(df = 261) = 2660.5568, p-val < .0001

Test of Moderators (coefficient 2):
F(df1 = 1, df2 = 261) = 0.2498, p-val = 0.6176

Model Results:

                estimate      se     tval   df    pval    ci.lb   ci.ub    
intrcpt           0.4218  0.4312   0.9782  261  0.3289  -0.4273  1.2709    
Log_background   -0.0306  0.0613  -0.4998  261  0.6176  -0.1513  0.0900    

---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Code
bubble_plot(mr_background,
            mod = "Log_background",
            group = "Study_ID",
            xlab = "Log-transformed backgroud")

Figure 13— Effect size and background area

9 Figure galary

Code
# overall effect
p1 <- orchard_plot(ma_all,
                    group = "Study_ID",
                    xlab = "log response ratio (lnRR)", 
                    angle = 45,
                    legend.pos = "bottom.right") + 
                    scale_x_discrete(labels = c("Overall effect")) + 
                    theme(legend.direction = "horizontal",
                    legend.text = element_text(size = 6)) +
                    
                    scale_colour_okabe_ito()+
                    scale_fill_okabe_ito()

#eyespot vs. conspicuous patterns
p2 <- orchard_plot(mr_eyespot,
                  mod = "Treatment_stimulus",
                  group = "Study_ID",
                  xlab = "log response ratio (lnRR)",
                  angle = 45,
                  legend.pos = "bottom.right") +
                  theme(legend.direction = "horizontal",
                  legend.text = element_text(size = 6)) +
                  scale_colour_okabe_ito(order = 2:3)+
                  scale_fill_okabe_ito(order = 2:3)
                  
# prey type - real vs. artificial
p3 <- orchard_plot(mr_prey_type,
                  mod = "Type_prey",
                  group = "Study_ID",
                  xlab = "log response ratio (lnRR)",
                  angle = 45,
                  legend.pos = "bottom.right") +
                  theme(legend.direction = "horizontal",
                  legend.text = element_text(size = 6))+ 
                  scale_colour_okabe_ito(order = 4:5) +
                  scale_fill_okabe_ito(order = 4:5)

# combine
patch1 <- p1 | (p2 / p3)
patch1 + plot_annotation(tag_levels = "a")

# output figure as pdf
# ggsave("fig1.pdf", width = 10, height = 5, dpi = 450)

Figure 14— Discrete moderators
Code
# these figs were created by multi meta-regression model results
# log-transformed area
p4 <- bubble_plot(mr_cons,
                  mod = "Log_area",
                  group = "Study_ID",
                  xlab = "Log-transformed area") +
                  scale_x_continuous(breaks = seq(0,7,1.5)) +
                  scale_y_continuous(breaks = seq(-2.5, 4.5, 1.5))

# number of patterns
p5 <- bubble_plot(mr_cons,
            mod = "Number_pattern",
            group = "Study_ID",
            xlab = "Number of patterns") +
                  scale_x_continuous(breaks = seq(0,11,1.5)) +
                  scale_y_continuous(breaks = seq(-2.5, 4.5, 1.5))

# combine
p4 / p5 + plot_annotation(tag_levels = "a")

# output figure as pdf
# ggsave("fig2_multi.pdf", width = 10, height = 10, dpi = 450)

Figure 15— Continuous moderators
Code
# funnel plot

# pdf("fig3.pdf")

funnel(ma_all, yaxis = "seinv", 
      xlab = "Standarised residuals",
      ylab = "Precision (inverse of SE)",
      xlim = c(-4.0, 4.5), 
      ylim = c(0.01, 80.0), 
      col = c(alpha("mediumvioletred", 0.5)),
      cex = 0.7)

# dev.off()

# Egger's test and decline effect
p7 <- bubble_plot(bias_model,
            mod = "sqrt_inv_e_n",
            group = "Study_ID",
            xlab = "Square root of inverse of effective sample size") +
            scale_y_continuous(breaks = seq(-2.5, 4.5, 1.5)) 

p8 <- bubble_plot(year_model,
            mod = "Year",
            group = "Study_ID",
            xlab = "Year of publication") +  scale_y_continuous(breaks = seq(-2.5, 4.0, 1.5)) 

# combine plots created by bubble_plot()
pub <- p7 / p8
pub +  plot_annotation(tag_levels = 'a') 

# output figure as pdf
# ggsave("fig4.pdf", width = 10, height = 10, dpi = 450)

Figure 16— Funnel plot

Figure 17— Egger’s test and Decline effect

Figure 18— Publication bias

10 R session infomation

R version 4.3.1 (2023-06-16)

Platform: aarch64-apple-darwin20 (64-bit)

locale: en_US.UTF-8||en_US.UTF-8||en_US.UTF-8||C||en_US.UTF-8||en_US.UTF-8

attached base packages: parallel, stats, graphics, grDevices, utils, datasets, methods and base

other attached packages: orchaRd(v.2.0), miWQS(v.0.4.4), metafor(v.4.2-0), numDeriv(v.2016.8-1.1), metadat(v.1.2-0), Matrix(v.1.6-1), patchwork(v.1.1.3), lubridate(v.1.9.2), forcats(v.1.0.0), stringr(v.1.5.0), dplyr(v.1.1.3), purrr(v.1.0.2), readr(v.2.1.4), tidyr(v.1.3.0), tibble(v.3.2.1), tidyverse(v.2.0.0), knitr(v.1.43), here(v.1.0.1), ggcorrplot(v.0.1.4), ggplot2(v.3.4.3), ggokabeito(v.0.1.0), DT(v.0.29), GoodmanKruskal(v.0.0.3) and ape(v.5.7-1)

loaded via a namespace (and not attached): gridExtra(v.2.3), glm2(v.1.2.1), sandwich(v.3.0-2), rlang(v.1.1.1), magrittr(v.2.0.3), multcomp(v.1.4-25), compiler(v.4.3.1), mgcv(v.1.8-42), reshape2(v.1.4.4), vctrs(v.0.6.3), quantreg(v.5.97), pkgconfig(v.2.0.3), fastmap(v.1.1.1), backports(v.1.4.1), mcmc(v.0.9-7), ellipsis(v.0.3.2), labeling(v.0.4.3), pander(v.0.6.5), utf8(v.1.2.3), rmarkdown(v.2.24), tzdb(v.0.4.0), ggbeeswarm(v.0.7.2), MatrixModels(v.0.5-2), xfun(v.0.40), cachem(v.1.0.8), jsonlite(v.1.8.7), gmm(v.1.8), cluster(v.2.1.4), R6(v.2.5.1), bslib(v.0.5.1), stringi(v.1.7.12), Rsolnp(v.1.16), rlist(v.0.4.6.2), rpart(v.4.1.19), jquerylib(v.0.1.4), estimability(v.1.4.1), Rcpp(v.1.0.11), zoo(v.1.8-12), tmvmixnorm(v.1.1.1), base64enc(v.0.1-3), pacman(v.0.5.1), splines(v.4.3.1), nnet(v.7.3-19), timechange(v.0.2.0), tidyselect(v.1.2.0), rstudioapi(v.0.15.0), yaml(v.2.3.7), codetools(v.0.2-19), plyr(v.1.8.8), lattice(v.0.21-8), withr(v.2.5.0), coda(v.0.19-4), evaluate(v.0.21), tmvtnorm(v.1.5), foreign(v.0.8-84), survival(v.3.5-5), pillar(v.1.9.0), corrplot(v.0.92), checkmate(v.2.2.0), stats4(v.4.3.1), generics(v.0.1.3), rprojroot(v.2.0.3), invgamma(v.1.1), mathjaxr(v.1.6-0), truncnorm(v.1.0-9), hms(v.1.1.3), munsell(v.0.5.0), scales(v.1.2.1), xtable(v.1.8-4), glue(v.1.6.2), emmeans(v.1.8.8), Hmisc(v.5.1-0), tools(v.4.3.1), data.table(v.1.14.8), SparseM(v.1.81), matrixNormal(v.0.1.1), mvtnorm(v.1.2-3), grid(v.4.3.1), MCMCpack(v.1.6-3), crosstalk(v.1.2.0), colorspace(v.2.1-0), nlme(v.3.1-162), beeswarm(v.0.4.0), condMVNorm(v.2020.1), vipor(v.0.4.5), htmlTable(v.2.4.1), latex2exp(v.0.9.6), Formula(v.1.2-5), cli(v.3.6.1), fansi(v.1.0.4), gtable(v.0.3.4), sass(v.0.4.7), digest(v.0.6.33), TH.data(v.1.1-2), farver(v.2.1.1), htmlwidgets(v.1.6.2), htmltools(v.0.5.6), lifecycle(v.1.0.3) and MASS(v.7.3-60)